MCL_QUEUE[MCL_IDX].op = __HYPERVISOR_update_va_mapping;
MCL_QUEUE[MCL_IDX].args[0] = (unsigned long)va;
MCL_QUEUE[MCL_IDX].args[1] = (unsigned long)ma;
- MCL_QUEUE[MCL_IDX].args[2] = UVMF_INVLPG_LOCAL;
+ MCL_QUEUE[MCL_IDX].args[2] = UVMF_INVLPG|UVMF_LOCAL;
mcl_increment_idx();
}
PT_UPDATES_FLUSH();
/* After all PTEs have been zapped we blow away stale TLB entries. */
- xn_rx_mcl[i-1].args[2] = UVMF_TLB_FLUSH_LOCAL;
+ xn_rx_mcl[i-1].args[2] = UVMF_TLB_FLUSH|UVMF_LOCAL;
/* Give away a batch of pages. */
xn_rx_mcl[i].op = __HYPERVISOR_dom_mem_op;
{
#ifdef CONFIG_XEN
if ( likely(vma->vm_mm == current->mm) ) {
- HYPERVISOR_update_va_mapping(address, entry, UVMF_INVLPG_LOCAL);
+ HYPERVISOR_update_va_mapping(address, entry, UVMF_INVLPG|UVMF_LOCAL);
} else {
set_pte(page_table, entry);
flush_tlb_page(vma, address);
{
struct mmuext_op op;
op.cmd = MMUEXT_TLB_FLUSH_MULTI;
- op.cpuset = mask.bits[0];
+ op.cpuset = (unsigned long)mask.bits;
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
{
struct mmuext_op op;
op.cmd = MMUEXT_INVLPG_MULTI;
- op.cpuset = mask.bits[0];
+ op.cpuset = (unsigned long)mask.bits;
op.linear_addr = ptr & PAGE_MASK;
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
mcl[i].args[2] = 0;
}
- mcl[nr_pages-1].args[2] = UVMF_TLB_FLUSH_ALL;
+ mcl[nr_pages-1].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
BUG();
}
mcl->args[3] = DOMID_SELF;
mcl++;
- mcl[-3].args[2] = UVMF_TLB_FLUSH_ALL;
+ mcl[-3].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
BUG();
mcl++;
}
- mcl[-1].args[2] = UVMF_TLB_FLUSH_ALL;
+ mcl[-1].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
BUG();
}
/* After all PTEs have been zapped we blow away stale TLB entries. */
- rx_mcl[i-1].args[2] = UVMF_TLB_FLUSH_ALL;
+ rx_mcl[i-1].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
/* Give away a batch of pages. */
rx_mcl[i].op = __HYPERVISOR_dom_mem_op;
mcl[i].args[2] = 0;
}
- mcl[nr_pages-1].args[2] = UVMF_TLB_FLUSH_ALL;
+ mcl[nr_pages-1].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
BUG();
}
do { \
if (__dirty) { \
if ( likely((__vma)->vm_mm == current->mm) ) { \
- HYPERVISOR_update_va_mapping((__address), (__entry), 0); \
- flush_tlb_page((__vma), (__address)); \
+ HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned long)((__vma)->vm_mm->cpu_vm_mask.bits)); \
} else { \
xen_l1_entry_update((__ptep), (__entry).pte_low); \
flush_tlb_page((__vma), (__address)); \
xpq_flush_queue();
/* After all PTEs have been zapped we blow away stale TLB entries. */
- rx_mcl[nr_pfns-1].args[2] = UVMF_TLB_FLUSH_LOCAL;
+ rx_mcl[nr_pfns-1].args[2] = UVMF_TLB_FLUSH|UVMF_LOCAL;
/* Give away a batch of pages. */
rx_mcl[nr_pfns].op = __HYPERVISOR_dom_mem_op;
mcl->op = __HYPERVISOR_update_va_mapping;
mcl->args[0] = sc->sc_rx_bufa[rx->id].xb_rx.xbrx_va;
mcl->args[1] = (rx->addr & PG_FRAME) | PG_V|PG_KW;
- mcl->args[2] = UVMF_TLB_FLUSH_LOCAL; // 0;
+ mcl->args[2] = UVMF_TLB_FLUSH|UVMF_LOCAL; // 0;
mcl++;
xpmap_phys_to_machine_mapping
xpq_flush_queue();
/* After all PTEs have been zapped we blow away stale TLB entries. */
- rx_mcl[nr_pfns-1].args[2] = UVMF_TLB_FLUSH_LOCAL;
+ rx_mcl[nr_pfns-1].args[2] = UVMF_TLB_FLUSH|UVMF_LOCAL;
/* Give away a batch of pages. */
rx_mcl[nr_pfns].op = __HYPERVISOR_dom_mem_op;
return okay;
}
+static inline unsigned long vcpuset_to_pcpuset(
+ struct domain *d, unsigned long vset)
+{
+ unsigned int vcpu;
+ unsigned long pset = 0;
+ struct exec_domain *ed;
+
+ while ( vset != 0 )
+ {
+ vcpu = find_first_set_bit(vset);
+ vset &= ~(1UL << vcpu);
+ if ( (vcpu < MAX_VIRT_CPUS) &&
+ ((ed = d->exec_domain[vcpu]) != NULL) )
+ pset |= 1UL << ed->processor;
+ }
+
+ return pset;
+}
+
int do_mmuext_op(
struct mmuext_op *uops,
unsigned int count,
case MMUEXT_TLB_FLUSH_MULTI:
case MMUEXT_INVLPG_MULTI:
{
- unsigned long inset = op.cpuset, outset = 0;
- while ( inset != 0 )
+ unsigned long vset, pset;
+ if ( unlikely(get_user(vset, (unsigned long *)op.cpuset)) )
{
- unsigned int vcpu = find_first_set_bit(inset);
- inset &= ~(1UL<<vcpu);
- if ( (vcpu < MAX_VIRT_CPUS) &&
- ((ed = d->exec_domain[vcpu]) != NULL) )
- outset |= 1UL << ed->processor;
+ okay = 0;
+ break;
}
+ pset = vcpuset_to_pcpuset(d, vset);
if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
- flush_tlb_mask(outset & d->cpuset);
+ flush_tlb_mask(pset & d->cpuset);
else
- flush_tlb_one_mask(outset & d->cpuset, op.linear_addr);
+ flush_tlb_one_mask(pset & d->cpuset, op.linear_addr);
break;
}
struct exec_domain *ed = current;
struct domain *d = ed->domain;
unsigned int cpu = ed->processor;
+ unsigned long vset, pset, bmap_ptr;
int rc = 0;
perfc_incrc(calls_to_update_va);
cleanup_writable_pagetable(d);
- /*
- * XXX When we make this support 4MB superpages we should also deal with
- * the case of updating L2 entries.
- */
-
if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
mk_l1_pgentry(val))) )
rc = -EINVAL;
if ( unlikely(shadow_mode_enabled(d)) )
update_shadow_va_mapping(va, val, ed, d);
- switch ( flags & UVMF_FLUSH_MASK )
+ switch ( flags & UVMF_FLUSHTYPE_MASK )
{
- case UVMF_TLB_FLUSH_LOCAL:
- local_flush_tlb();
- percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
- break;
- case UVMF_TLB_FLUSH_ALL:
- flush_tlb_mask(d->cpuset);
- percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
- break;
- case UVMF_INVLPG_LOCAL:
- local_flush_tlb_one(va);
+ case UVMF_TLB_FLUSH:
+ switch ( (bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK) )
+ {
+ case UVMF_LOCAL:
+ local_flush_tlb();
+ break;
+ case UVMF_ALL:
+ flush_tlb_mask(d->cpuset);
+ break;
+ default:
+ if ( unlikely(get_user(vset, (unsigned long *)bmap_ptr)) )
+ rc = -EFAULT;
+ pset = vcpuset_to_pcpuset(d, vset);
+ flush_tlb_mask(pset & d->cpuset);
+ break;
+ }
break;
- case UVMF_INVLPG_ALL:
- flush_tlb_one_mask(d->cpuset, va);
+
+ case UVMF_INVLPG:
+ switch ( (bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK) )
+ {
+ case UVMF_LOCAL:
+ local_flush_tlb_one(va);
+ break;
+ case UVMF_ALL:
+ flush_tlb_one_mask(d->cpuset, va);
+ break;
+ default:
+ if ( unlikely(get_user(vset, (unsigned long *)bmap_ptr)) )
+ rc = -EFAULT;
+ pset = vcpuset_to_pcpuset(d, vset);
+ flush_tlb_one_mask(pset & d->cpuset, va);
+ break;
+ }
break;
}
* linear_addr: Linear address to be flushed from the local TLB.
*
* cmd: MMUEXT_TLB_FLUSH_MULTI
- * cpuset: Set of VCPUs to be flushed.
+ * cpuset: Pointer to bitmap of VCPUs to be flushed.
*
* cmd: MMUEXT_INVLPG_MULTI
* linear_addr: Linear address to be flushed.
- * cpuset: Set of VCPUs to be flushed.
+ * cpuset: Pointer to bitmap of VCPUs to be flushed.
*
* cmd: MMUEXT_TLB_FLUSH_ALL
* No additional arguments. Flushes all VCPUs' TLBs.
/* SET_LDT */
unsigned int nr_ents;
/* TLB_FLUSH_MULTI, INVLPG_MULTI */
- unsigned long cpuset;
+ void *cpuset;
};
};
#endif
/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
-#define UVMF_TLB_FLUSH_LOCAL 1 /* Flush local CPU's TLB. */
-#define UVMF_INVLPG_LOCAL 2 /* Flush VA from local CPU's TLB. */
-#define UVMF_TLB_FLUSH_ALL 3 /* Flush all TLBs. */
-#define UVMF_INVLPG_ALL 4 /* Flush VA from all TLBs. */
-#define UVMF_FLUSH_MASK 7
+/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
+/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
+#define UVMF_NONE (0UL) /* No flushing at all. */
+#define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */
+#define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */
+#define UVMF_FLUSHTYPE_MASK (3UL<<0)
+#define UVMF_MULTI (0UL<<1) /* Flush subset of TLBs. */
+#define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */
+#define UVMF_ALL (1UL<<2) /* Flush all TLBs. */
/*
* Commands to HYPERVISOR_sched_op().